import numpy as np
Initializing state and weights. Each neuron $i$ has two states: $V_i=0$ ("not firing") and $V_i=1$ ("firing at maximum rate").
NODES = 20
V = np.random.choice([0,1], [NODES])
V
array([0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0])
The strength of connection is defined as $T_{ij}$.
# w = np.random.choice([0, 1], size=(nodes, nodes))
T = np.random.randn(NODES, NODES)
T = np.triu(T) + np.triu(T, 1).T
T -= np.diag(np.diag(T))
T
array([[ 0. , 0.32448288, -0.01523121, -0.05460559, -0.79290377,
-0.64528411, -0.64388326, 1.53996944, 1.6401477 , -1.03999215,
0.47278382, 0.958597 , 0.98529905, 0.24965009, 0.39080888,
-0.33024704, 0.29648248, 0.28946463, 0.18535756, -1.64831124],
[ 0.32448288, 0. , -0.5045555 , -0.32767569, 0.66522988,
-0.02438141, -0.76430824, 1.76584281, 0.95015331, 1.10867423,
-0.46136761, -0.0799547 , 1.38111846, 0.6069735 , 0.7426257 ,
2.06222843, -1.28601884, -0.43027503, 1.0745016 , 1.14218609],
[-0.01523121, -0.5045555 , 0. , -1.86708463, -0.29156637,
-0.40825585, -0.27199738, -1.47483975, 1.19663517, -0.68049641,
-1.43080221, -0.38010853, -0.4685491 , -1.24981974, -1.43119089,
0.18848771, 1.3221258 , 1.22566142, -0.88669619, -1.17218601],
[-0.05460559, -0.32767569, -1.86708463, 0. , -0.91164767,
0.1320975 , -2.50222809, -0.55089098, 0.22838875, -0.16929988,
0.34242212, 0.21749526, 2.56405437, -0.36000213, 0.39941703,
-1.0417753 , 0.45706567, 0.55566071, 1.25062026, 0.08562102],
[-0.79290377, 0.66522988, -0.29156637, -0.91164767, 0. ,
-0.63932774, -0.93879502, 1.34655189, 0.78013861, -0.50314177,
-0.1928367 , -0.37131436, -0.44045977, -1.46771633, 0.33265387,
-1.12021218, -0.33367198, -1.28148312, -1.17646323, 0.38469843],
[-0.64528411, -0.02438141, -0.40825585, 0.1320975 , -0.63932774,
0. , -1.12041374, -1.92529911, -0.89435769, -0.30232787,
-0.76213036, -1.81449063, -0.63997384, -0.15632013, 1.12612522,
0.06973163, 0.49385145, 1.80072647, 0.82912765, -0.35637499],
[-0.64388326, -0.76430824, -0.27199738, -2.50222809, -0.93879502,
-1.12041374, 0. , -0.10898911, -0.33851785, 0.18896533,
-1.68461777, -0.44667635, 1.14145999, -0.69770235, -0.05484314,
0.74305574, -1.09259712, 0.0287127 , 0.85772374, -0.52614756],
[ 1.53996944, 1.76584281, -1.47483975, -0.55089098, 1.34655189,
-1.92529911, -0.10898911, 0. , -1.73537529, 0.74035117,
-0.80572297, -0.98696028, -1.33139481, -0.90676423, -1.12931918,
-1.79039997, -0.57157213, 0.29478115, 0.12701931, 0.51005748],
[ 1.6401477 , 0.95015331, 1.19663517, 0.22838875, 0.78013861,
-0.89435769, -0.33851785, -1.73537529, 0. , 1.01604224,
-0.68061442, 1.47152205, -0.95772334, 0.20436532, -0.73984666,
0.3421559 , -0.10680158, -0.92652856, -0.3975525 , 0.6340511 ],
[-1.03999215, 1.10867423, -0.68049641, -0.16929988, -0.50314177,
-0.30232787, 0.18896533, 0.74035117, 1.01604224, 0. ,
0.37086984, 1.77562897, 1.22723384, -2.32112104, -0.24291038,
0.80301586, -1.59239404, 1.10570557, 1.25748806, 1.0377024 ],
[ 0.47278382, -0.46136761, -1.43080221, 0.34242212, -0.1928367 ,
-0.76213036, -1.68461777, -0.80572297, -0.68061442, 0.37086984,
0. , -0.29619044, -0.23046812, -0.92854874, 0.55347938,
0.1566399 , -2.30682151, -1.20988343, 0.90051501, 1.14623275],
[ 0.958597 , -0.0799547 , -0.38010853, 0.21749526, -0.37131436,
-1.81449063, -0.44667635, -0.98696028, 1.47152205, 1.77562897,
-0.29619044, 0. , 0.26602843, -0.02721663, 1.17472809,
-0.04165756, -0.01505654, -1.06096063, -0.34812999, 0.86702566],
[ 0.98529905, 1.38111846, -0.4685491 , 2.56405437, -0.44045977,
-0.63997384, 1.14145999, -1.33139481, -0.95772334, 1.22723384,
-0.23046812, 0.26602843, 0. , -0.31262252, 0.67711341,
0.12322215, -0.41654233, 2.12700724, 0.37191993, 1.00363008],
[ 0.24965009, 0.6069735 , -1.24981974, -0.36000213, -1.46771633,
-0.15632013, -0.69770235, -0.90676423, 0.20436532, -2.32112104,
-0.92854874, -0.02721663, -0.31262252, 0. , 0.90994125,
-1.42411758, 0.51315239, 0.22000019, 0.40156278, -0.13446356],
[ 0.39080888, 0.7426257 , -1.43119089, 0.39941703, 0.33265387,
1.12612522, -0.05484314, -1.12931918, -0.73984666, -0.24291038,
0.55347938, 1.17472809, 0.67711341, 0.90994125, 0. ,
-0.57643464, -1.3825812 , -0.14828928, 1.86505754, 1.29756848],
[-0.33024704, 2.06222843, 0.18848771, -1.0417753 , -1.12021218,
0.06973163, 0.74305574, -1.79039997, 0.3421559 , 0.80301586,
0.1566399 , -0.04165756, 0.12322215, -1.42411758, -0.57643464,
0. , -1.18457616, 0.81031006, 0.65608814, -0.03058778],
[ 0.29648248, -1.28601884, 1.3221258 , 0.45706567, -0.33367198,
0.49385145, -1.09259712, -0.57157213, -0.10680158, -1.59239404,
-2.30682151, -0.01505654, -0.41654233, 0.51315239, -1.3825812 ,
-1.18457616, 0. , -0.09261677, 1.13403681, 0.60349133],
[ 0.28946463, -0.43027503, 1.22566142, 0.55566071, -1.28148312,
1.80072647, 0.0287127 , 0.29478115, -0.92652856, 1.10570557,
-1.20988343, -1.06096063, 2.12700724, 0.22000019, -0.14828928,
0.81031006, -0.09261677, 0. , -2.11621334, 0.34983359],
[ 0.18535756, 1.0745016 , -0.88669619, 1.25062026, -1.17646323,
0.82912765, 0.85772374, 0.12701931, -0.3975525 , 1.25748806,
0.90051501, -0.34812999, 0.37191993, 0.40156278, 1.86505754,
0.65608814, 1.13403681, -2.11621334, 0. , 1.15873561],
[-1.64831124, 1.14218609, -1.17218601, 0.08562102, 0.38469843,
-0.35637499, -0.52614756, 0.51005748, 0.6340511 , 1.0377024 ,
1.14623275, 0.86702566, 1.00363008, -0.13446356, 1.29756848,
-0.03058778, 0.60349133, 0.34983359, 1.15873561, 0. ]])
The instantaneous state o the system is specified by listing the $N$ values of $V_i$, so it represented by a binary word of $N$ bits.
The state changes in time according to the following algorithm. For each neuron $i$ there is a fixed threshold $U_i$. (Unless otherwise stated, we choose $U_i = 0$).
U = np.zeros([NODES])
U
array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0.])
Each neuron readjusts its state randomly in time but with a mean attempt rate W, setting
$$ V_i → 1 \, \text{if} \, \sum_{j \neq i} T_{ij}V_j \geq U_i $$
and
$$ V_i → 0 \, \text{if} \, \sum_{j \neq i} T_{ij}V_j < U_i $$
Thus, each neuron randomly and asynchronously evaluates whether it is above or below threshold and readjusts accordingly.
Suppose we wish to store the set of states $V^s$, $s = 1 \cdots n$. We use the storage prescription
$$ T_{ij} = \sum_s (2*V_i^s -1)(2*V_j^s -1) $$
but with $T_{ii}=0$.
Alright, let's try this out with a simple example state to store: $V^s = [[1,0,1,1,0]]$. Then
V_example = np.random.choice([0,1], [NODES])
T_example = np.zeros([NODES, NODES])
for i in range(NODES):
for j in range(NODES):
T_example[i,j] = (2*V_example[i] - 1) * (2*V_example[j] - 1)
T_example -= np.diag(np.diag(T_example))
T_example
array([[ 0., -1., -1., -1., 1., -1., -1., -1., 1., -1., 1., -1., -1.,
1., 1., -1., 1., 1., 1., -1.],
[-1., 0., 1., 1., -1., 1., 1., 1., -1., 1., -1., 1., 1.,
-1., -1., 1., -1., -1., -1., 1.],
[-1., 1., 0., 1., -1., 1., 1., 1., -1., 1., -1., 1., 1.,
-1., -1., 1., -1., -1., -1., 1.],
[-1., 1., 1., 0., -1., 1., 1., 1., -1., 1., -1., 1., 1.,
-1., -1., 1., -1., -1., -1., 1.],
[ 1., -1., -1., -1., 0., -1., -1., -1., 1., -1., 1., -1., -1.,
1., 1., -1., 1., 1., 1., -1.],
[-1., 1., 1., 1., -1., 0., 1., 1., -1., 1., -1., 1., 1.,
-1., -1., 1., -1., -1., -1., 1.],
[-1., 1., 1., 1., -1., 1., 0., 1., -1., 1., -1., 1., 1.,
-1., -1., 1., -1., -1., -1., 1.],
[-1., 1., 1., 1., -1., 1., 1., 0., -1., 1., -1., 1., 1.,
-1., -1., 1., -1., -1., -1., 1.],
[ 1., -1., -1., -1., 1., -1., -1., -1., 0., -1., 1., -1., -1.,
1., 1., -1., 1., 1., 1., -1.],
[-1., 1., 1., 1., -1., 1., 1., 1., -1., 0., -1., 1., 1.,
-1., -1., 1., -1., -1., -1., 1.],
[ 1., -1., -1., -1., 1., -1., -1., -1., 1., -1., 0., -1., -1.,
1., 1., -1., 1., 1., 1., -1.],
[-1., 1., 1., 1., -1., 1., 1., 1., -1., 1., -1., 0., 1.,
-1., -1., 1., -1., -1., -1., 1.],
[-1., 1., 1., 1., -1., 1., 1., 1., -1., 1., -1., 1., 0.,
-1., -1., 1., -1., -1., -1., 1.],
[ 1., -1., -1., -1., 1., -1., -1., -1., 1., -1., 1., -1., -1.,
0., 1., -1., 1., 1., 1., -1.],
[ 1., -1., -1., -1., 1., -1., -1., -1., 1., -1., 1., -1., -1.,
1., 0., -1., 1., 1., 1., -1.],
[-1., 1., 1., 1., -1., 1., 1., 1., -1., 1., -1., 1., 1.,
-1., -1., 0., -1., -1., -1., 1.],
[ 1., -1., -1., -1., 1., -1., -1., -1., 1., -1., 1., -1., -1.,
1., 1., -1., 0., 1., 1., -1.],
[ 1., -1., -1., -1., 1., -1., -1., -1., 1., -1., 1., -1., -1.,
1., 1., -1., 1., 0., 1., -1.],
[ 1., -1., -1., -1., 1., -1., -1., -1., 1., -1., 1., -1., -1.,
1., 1., -1., 1., 1., 0., -1.],
[-1., 1., 1., 1., -1., 1., 1., 1., -1., 1., -1., 1., 1.,
-1., -1., 1., -1., -1., -1., 0.]])
So now T_example is the connection strength between each of the neurons. Let's see if the network can retrieve the stored state.
In order to retrieve the stored state, we need to update the state of each neuron asynchronously.
Here's the algorithm:
- Choose a neuron $i$ at random.
- Compute the activation $a_i = \sum_{j \neq i} T_{ij}V_j$
- If $a_i \geq 0$, set $V_i = 1$
- If $a_i < 0$, set $V_i = 0$
- Go back to step 1.
def retrieve_state(T, V, U=0, max_iterations=1000):
prev_V = np.copy(V)
iterations = 0
while iterations < max_iterations:
i = np.random.randint(0, NODES)
a_i = np.sum(T[i, :] * V)
if a_i >= U:
V[i] = 1
else:
V[i] = 0
# Check if state has converged
if np.array_equal(V, prev_V):
return V
prev_V = np.copy(V)
iterations += 1
return V # Return even if not converged after max iterations
print("target state:", V_example)
# add some noise to the target state
V_example[0] = 0
print("noisy target state:", V_example)
print("retrieved state:", retrieve_state(T_example, V_example))
target state: [0 1 1 1 0 1 1 1 0 1 0 1 1 0 0 1 0 0 0 1] noisy target state: [0 1 1 1 0 1 1 1 0 1 0 1 1 0 0 1 0 0 0 1] retrieved state: [0 1 1 1 0 1 1 1 0 1 0 1 1 0 0 1 0 0 0 1]
Okay, so the network is able to retrieve the stored state even with some noise.
Now let's see if the network can retrieve a state that is not stored.
target_state = np.random.choice([0,1], [NODES])
print("target state:", target_state)
print("retrieved state:", retrieve_state(T_example, target_state))
target state: [0 0 1 0 0 1 1 0 0 1 1 0 0 1 1 1 0 0 0 1] retrieved state: [0 0 1 0 0 1 1 0 0 1 1 0 0 1 1 1 0 0 0 1]
Not very good. But if we add this vector to the stored vectors, we can retrieve the target state.
storage_prescription = np.outer((2 * target_state) - 1, (2 * target_state) - 1)
storage_prescription -= np.diag(np.diag(storage_prescription))
T_example += storage_prescription
print("updated T:", T_example)
print("target state:", target_state)
print("retrieved state:", retrieve_state(T_example, target_state))
updated T: [[ 0. 0. -2. 0. 2. -2. -2. 0. 2. -2. 0. 0. 0. 0. 0. -2. 2. 2. 2. -2.] [ 0. 0. 0. 2. 0. 0. 0. 2. 0. 0. -2. 2. 2. -2. -2. 0. 0. 0. 0. 0.] [-2. 0. 0. 0. -2. 2. 2. 0. -2. 2. 0. 0. 0. 0. 0. 2. -2. -2. -2. 2.] [ 0. 2. 0. 0. 0. 0. 0. 2. 0. 0. -2. 2. 2. -2. -2. 0. 0. 0. 0. 0.] [ 2. 0. -2. 0. 0. -2. -2. 0. 2. -2. 0. 0. 0. 0. 0. -2. 2. 2. 2. -2.] [-2. 0. 2. 0. -2. 0. 2. 0. -2. 2. 0. 0. 0. 0. 0. 2. -2. -2. -2. 2.] [-2. 0. 2. 0. -2. 2. 0. 0. -2. 2. 0. 0. 0. 0. 0. 2. -2. -2. -2. 2.] [ 0. 2. 0. 2. 0. 0. 0. 0. 0. 0. -2. 2. 2. -2. -2. 0. 0. 0. 0. 0.] [ 2. 0. -2. 0. 2. -2. -2. 0. 0. -2. 0. 0. 0. 0. 0. -2. 2. 2. 2. -2.] [-2. 0. 2. 0. -2. 2. 2. 0. -2. 0. 0. 0. 0. 0. 0. 2. -2. -2. -2. 2.] [ 0. -2. 0. -2. 0. 0. 0. -2. 0. 0. 0. -2. -2. 2. 2. 0. 0. 0. 0. 0.] [ 0. 2. 0. 2. 0. 0. 0. 2. 0. 0. -2. 0. 2. -2. -2. 0. 0. 0. 0. 0.] [ 0. 2. 0. 2. 0. 0. 0. 2. 0. 0. -2. 2. 0. -2. -2. 0. 0. 0. 0. 0.] [ 0. -2. 0. -2. 0. 0. 0. -2. 0. 0. 2. -2. -2. 0. 2. 0. 0. 0. 0. 0.] [ 0. -2. 0. -2. 0. 0. 0. -2. 0. 0. 2. -2. -2. 2. 0. 0. 0. 0. 0. 0.] [-2. 0. 2. 0. -2. 2. 2. 0. -2. 2. 0. 0. 0. 0. 0. 0. -2. -2. -2. 2.] [ 2. 0. -2. 0. 2. -2. -2. 0. 2. -2. 0. 0. 0. 0. 0. -2. 0. 2. 2. -2.] [ 2. 0. -2. 0. 2. -2. -2. 0. 2. -2. 0. 0. 0. 0. 0. -2. 2. 0. 2. -2.] [ 2. 0. -2. 0. 2. -2. -2. 0. 2. -2. 0. 0. 0. 0. 0. -2. 2. 2. 0. -2.] [-2. 0. 2. 0. -2. 2. 2. 0. -2. 2. 0. 0. 0. 0. 0. 2. -2. -2. -2. 0.]] target state: [0 0 1 0 0 1 1 0 0 1 1 0 0 1 1 1 0 0 0 1] retrieved state: [0 0 1 0 0 1 1 0 0 1 1 0 0 1 1 1 0 0 0 1]
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from IPython.display import HTML
import matplotlib as mpl
mpl.rcParams['animation.embed_limit'] = 100000000 # Set to 100MB
def create_network_animation(T, initial_V, U=0, max_iterations=100, interval=200):
"""
Create an animation of network state evolution with state vector display
and highlighted edges for the updating node
Parameters:
T: adjacency matrix
initial_V: initial state vector
U: threshold
max_iterations: maximum number of iterations
interval: time between frames in milliseconds
"""
nodes = len(T)
# Create graph from adjacency matrix
G = nx.from_numpy_array(T)
pos = nx.spring_layout(G, seed=42) # Fixed layout
# Setup the figure and subplots
fig = plt.figure(figsize=(12, 10))
gs = fig.add_gridspec(2, 1, height_ratios=[4, 1])
ax_network = fig.add_subplot(gs[0])
ax_vector = fig.add_subplot(gs[1])
# Initialize node colors
V = np.copy(initial_V)
colors = ['red' if v == 1 else 'blue' for v in V]
# Draw initial network
nodes_draw = nx.draw_networkx_nodes(G, pos, node_color=colors,
node_size=500, ax=ax_network)
# Create edge collections for both regular and highlighted edges
all_edges = list(G.edges())
regular_edge_collection = nx.draw_networkx_edges(G, pos, ax=ax_network,
width=1.0, alpha=0.5)
# Create empty highlighted edge collection
edge_pos = np.array([(pos[u], pos[v]) for u, v in all_edges])
highlighted_edge_collection = nx.draw_networkx_edges(G, pos, ax=ax_network,
width=2.5, edge_color='purple',
alpha=0.0) # Start with transparent
# Add node labels
labels = {i: str(i) for i in range(nodes)}
nx.draw_networkx_labels(G, pos, labels)
# Title with iteration counter
title = ax_network.set_title('Iteration: 0')
# Initialize vector display
bar_container = ax_vector.bar(range(nodes), V, color=colors)
ax_vector.set_ylim(-0.1, 1.1)
ax_vector.set_xlim(-0.5, nodes-0.5)
ax_vector.set_xlabel('Node Index')
ax_vector.set_ylabel('State')
ax_vector.set_title('State Vector')
# Add grid for vector display
ax_vector.grid(True, axis='y', linestyle='--', alpha=0.7)
def update(frame):
nonlocal V
# Update state
i = np.random.randint(0, nodes)
a_i = np.sum(T[i, :] * V)
V[i] = 1 if a_i >= U else 0
# Update colors
colors = ['red' if v == 1 else 'blue' for v in V]
nodes_draw.set_color(colors)
# Update vector display
for bar, val, color in zip(bar_container, V, colors):
bar.set_height(val)
bar.set_color(color)
# Update edge highlighting
# Reset all edges to normal
regular_edge_collection.set_alpha(0.5)
# Get edges connected to node i
connected_edges = [(i, j) for j in range(nodes) if T[i, j] != 0]
connected_edges.extend([(j, i) for j in range(nodes) if T[j, i] != 0])
# Update highlighted edges
if connected_edges:
edge_pos = np.array([(pos[u], pos[v]) for u, v in connected_edges])
highlighted_edge_collection.set_segments(edge_pos)
highlighted_edge_collection.set_alpha(0.8)
else:
highlighted_edge_collection.set_alpha(0.0)
# Update title
title.set_text(f'Iteration: {frame+1} (Updating Node {i})')
return (nodes_draw, regular_edge_collection, highlighted_edge_collection,
*bar_container.patches)
# Create animation
anim = FuncAnimation(fig, update, frames=max_iterations,
interval=interval, blit=True)
plt.close() # Prevent display of static plot
return HTML(anim.to_jshtml())
# Create and display animation
animation = create_network_animation(
T_example, np.random.choice([0, 1], [NODES]), U=0.0
)
display(animation)
print("recorded states:", np.array([V_example, target_state]))